[IA64] Fix a VTi physical mode bug
authorawilliam@xenbuild.aw <awilliam@xenbuild.aw>
Sun, 1 Oct 2006 17:19:45 +0000 (11:19 -0600)
committerawilliam@xenbuild.aw <awilliam@xenbuild.aw>
Sun, 1 Oct 2006 17:19:45 +0000 (11:19 -0600)
When guest writes rr in physical mode, if it is rr0 or rr4, Xen can't
write it into machine rr.

Signed-off-by: Xuefei Xu <anthony.xu@intel.com>
xen/arch/ia64/vmx/vmx_phy_mode.c
xen/arch/ia64/vmx/vmx_vcpu.c

index 8745721d54394c815331017c53a03c1edc65da60..f86965abe1848c04de8fa0b2bf6b712278af811e 100644 (file)
@@ -126,10 +126,16 @@ void
 vmx_init_all_rr(VCPU *vcpu)
 {
        VMX(vcpu, vrr[VRN0]) = 0x38;
+       // enable vhpt in guest physical mode
+       vcpu->arch.metaphysical_rr0 |= 1;
+       vcpu->arch.metaphysical_saved_rr0 = vrrtomrr(vcpu, 0x38);
        VMX(vcpu, vrr[VRN1]) = 0x38;
        VMX(vcpu, vrr[VRN2]) = 0x38;
        VMX(vcpu, vrr[VRN3]) = 0x38;
        VMX(vcpu, vrr[VRN4]) = 0x38;
+       // enable vhpt in guest physical mode
+       vcpu->arch.metaphysical_rr4 |= 1;
+       vcpu->arch.metaphysical_saved_rr4 = vrrtomrr(vcpu, 0x38);
        VMX(vcpu, vrr[VRN5]) = 0x38;
        VMX(vcpu, vrr[VRN6]) = 0x38;
        VMX(vcpu, vrr[VRN7]) = 0x738;
@@ -141,11 +147,9 @@ void
 vmx_load_all_rr(VCPU *vcpu)
 {
        unsigned long psr;
-       ia64_rr phy_rr;
 
        local_irq_save(psr);
 
-
        /* WARNING: not allow co-exist of both virtual mode and physical
         * mode in same region
         */
@@ -154,24 +158,16 @@ vmx_load_all_rr(VCPU *vcpu)
                        panic_domain(vcpu_regs(vcpu),
                                     "Unexpected domain switch in phy emul\n");
                }
-               phy_rr.rrval = vcpu->arch.metaphysical_rr0;
-               //phy_rr.ps = PAGE_SHIFT;
-               phy_rr.ve = 1;
-
-               ia64_set_rr((VRN0 << VRN_SHIFT), phy_rr.rrval);
+               ia64_set_rr((VRN0 << VRN_SHIFT), vcpu->arch.metaphysical_rr0);
                ia64_dv_serialize_data();
-               phy_rr.rrval = vcpu->arch.metaphysical_rr4;
-               //phy_rr.ps = PAGE_SHIFT;
-               phy_rr.ve = 1;
-
-               ia64_set_rr((VRN4 << VRN_SHIFT), phy_rr.rrval);
+               ia64_set_rr((VRN4 << VRN_SHIFT), vcpu->arch.metaphysical_rr4);
                ia64_dv_serialize_data();
        } else {
                ia64_set_rr((VRN0 << VRN_SHIFT),
-                            vrrtomrr(vcpu, VMX(vcpu, vrr[VRN0])));
+                            vcpu->arch.metaphysical_saved_rr0);
                ia64_dv_serialize_data();
                ia64_set_rr((VRN4 << VRN_SHIFT),
-                            vrrtomrr(vcpu, VMX(vcpu, vrr[VRN4])));
+                            vcpu->arch.metaphysical_saved_rr4);
                ia64_dv_serialize_data();
        }
 
@@ -209,21 +205,11 @@ void
 switch_to_physical_rid(VCPU *vcpu)
 {
     UINT64 psr;
-    ia64_rr phy_rr, mrr;
-
     /* Save original virtual mode rr[0] and rr[4] */
     psr=ia64_clear_ic();
-    phy_rr.rrval = vcpu->domain->arch.metaphysical_rr0;
-    mrr.rrval = ia64_get_rr(VRN0 << VRN_SHIFT);
-    phy_rr.ps = mrr.ps;
-    phy_rr.ve = 1;
-    ia64_set_rr(VRN0<<VRN_SHIFT, phy_rr.rrval);
+    ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_rr0);
     ia64_srlz_d();
-    phy_rr.rrval = vcpu->domain->arch.metaphysical_rr4;
-    mrr.rrval = ia64_get_rr(VRN4 << VRN_SHIFT);
-    phy_rr.ps = mrr.ps;
-    phy_rr.ve = 1;
-    ia64_set_rr(VRN4<<VRN_SHIFT, phy_rr.rrval);
+    ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->arch.metaphysical_rr4);
     ia64_srlz_d();
 
     ia64_set_psr(psr);
@@ -236,15 +222,10 @@ void
 switch_to_virtual_rid(VCPU *vcpu)
 {
     UINT64 psr;
-    ia64_rr mrr;
-
     psr=ia64_clear_ic();
-
-    vcpu_get_rr(vcpu,VRN0<<VRN_SHIFT,&mrr.rrval);
-    ia64_set_rr(VRN0<<VRN_SHIFT, vrrtomrr(vcpu, mrr.rrval));
+    ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_saved_rr0);
     ia64_srlz_d();
-    vcpu_get_rr(vcpu,VRN4<<VRN_SHIFT,&mrr.rrval);
-    ia64_set_rr(VRN4<<VRN_SHIFT, vrrtomrr(vcpu, mrr.rrval));
+    ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->arch.metaphysical_saved_rr4);
     ia64_srlz_d();
     ia64_set_psr(psr);
     ia64_srlz_i();
index 3d9c65a7f0c1daa71002f813ee3a1d8d744ddcd2..ea78850a2fee5fde97ac6352ad825ca28a5bd329 100644 (file)
@@ -212,19 +212,32 @@ IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val)
 {
     ia64_rr oldrr,newrr;
     extern void * pal_vaddr;
+    u64 rrval;
 
     vcpu_get_rr(vcpu, reg, &oldrr.rrval);
     newrr.rrval=val;
     if (newrr.rid >= (1 << vcpu->domain->arch.rid_bits))
         panic_domain (NULL, "use of invalid rid %x\n", newrr.rid);
 
-    VMX(vcpu,vrr[reg>>61]) = val;
-    switch((u64)(reg>>61)) {
+    VMX(vcpu,vrr[reg>>VRN_SHIFT]) = val;
+    switch((u64)(reg>>VRN_SHIFT)) {
     case VRN7:
         vmx_switch_rr7(vrrtomrr(vcpu,val),vcpu->domain->shared_info,
         (void *)vcpu->arch.privregs,
         (void *)vcpu->arch.vhpt.hash, pal_vaddr );
        break;
+    case VRN4:
+        rrval = vrrtomrr(vcpu,val);
+        vcpu->arch.metaphysical_saved_rr4 = rrval;
+        if (!is_physical_mode(vcpu))
+            ia64_set_rr(reg,rrval);
+        break;
+    case VRN0:
+        rrval = vrrtomrr(vcpu,val);
+        vcpu->arch.metaphysical_saved_rr0 = rrval;
+        if (!is_physical_mode(vcpu))
+            ia64_set_rr(reg,rrval);
+        break;
     default:
         ia64_set_rr(reg,vrrtomrr(vcpu,val));
         break;